Stil more cleanup and moving to 2.6.13 base
authordjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Thu, 1 Sep 2005 17:09:27 +0000 (11:09 -0600)
committerdjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Thu, 1 Sep 2005 17:09:27 +0000 (11:09 -0600)
31 files changed:
xen/arch/ia64/hpsimserial.c
xen/arch/ia64/linux-xen/README.origin
xen/arch/ia64/linux-xen/hpsim_ssc.h [new file with mode: 0644]
xen/arch/ia64/process.c
xen/arch/ia64/xenmisc.c
xen/include/asm-ia64/config.h
xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h
xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h [deleted file]
xen/include/asm-ia64/linux-xen/asm/ia64regs.h
xen/include/asm-ia64/linux-xen/asm/io.h
xen/include/asm-ia64/linux-xen/asm/kregs.h
xen/include/asm-ia64/linux-xen/asm/mca_asm.h
xen/include/asm-ia64/linux-xen/asm/page.h
xen/include/asm-ia64/linux-xen/asm/pgalloc.h
xen/include/asm-ia64/linux-xen/asm/processor.h
xen/include/asm-ia64/linux-xen/asm/spinlock.h
xen/include/asm-ia64/linux-xen/asm/system.h
xen/include/asm-ia64/linux-xen/asm/tlbflush.h
xen/include/asm-ia64/linux-xen/asm/types.h
xen/include/asm-ia64/linux-xen/asm/uaccess.h
xen/include/asm-ia64/linux-xen/linux/cpumask.h
xen/include/asm-ia64/linux-xen/linux/hardirq.h
xen/include/asm-ia64/linux-xen/linux/interrupt.h
xen/include/asm-ia64/xengcc_intrin.h [new file with mode: 0644]
xen/include/asm-ia64/xenia64regs.h [new file with mode: 0644]
xen/include/asm-ia64/xenkregs.h [new file with mode: 0644]
xen/include/asm-ia64/xenpage.h [new file with mode: 0644]
xen/include/asm-ia64/xenprocessor.h
xen/include/asm-ia64/xenspinlock.h [new file with mode: 0644]
xen/include/asm-ia64/xensystem.h
xen/include/asm-ia64/xentypes.h [new file with mode: 0644]

index 3e87aa3332d436e0e682ea7b954c44d072ff4711..6fa50fb859cfedd81e1d9f12b6003e7c99bcc0cb 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/config.h>
 #include <xen/sched.h>
 #include <xen/serial.h>
-#include <asm/hpsim_ssc.h>
+#include "hpsim_ssc.h"
 
 static void hp_ski_putc(struct serial_port *port, char c)
 {
index ff15575ede49883247f9f01a8444441cce26e106..a8c592871a1aa0b7d9a0e0fd5aca5859409c132c 100644 (file)
@@ -7,6 +7,7 @@ to future versions of the corresponding Linux files.
 efi.c          -> linux/arch/ia64/kernel/efi.c
 entry.h                -> linux/arch/ia64/kernel/entry.h
 entry.S                -> linux/arch/ia64/kernel/entry.S
+hpsim_ssc.h    -> linux/arch/ia64/hp/sim/hpsim_ssc.h
 irq_ia64.c     -> linux/arch/ia64/kernel/irq_ia64.c
 minstate.h     -> linux/arch/ia64/kernel/minstate.h
 mm_contig.c    -> linux/arch/ia64/mm/contig.c
diff --git a/xen/arch/ia64/linux-xen/hpsim_ssc.h b/xen/arch/ia64/linux-xen/hpsim_ssc.h
new file mode 100644 (file)
index 0000000..c3f36e7
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Platform dependent support for HP simulator.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
+ */
+#ifndef _IA64_PLATFORM_HPSIM_SSC_H
+#define _IA64_PLATFORM_HPSIM_SSC_H
+
+/* Simulator system calls: */
+
+#define SSC_CONSOLE_INIT               20
+#define SSC_GETCHAR                    21
+#define SSC_PUTCHAR                    31
+#define SSC_CONNECT_INTERRUPT          58
+#define SSC_GENERATE_INTERRUPT         59
+#define SSC_SET_PERIODIC_INTERRUPT     60
+#define SSC_GET_RTC                    65
+#define SSC_EXIT                       66
+#define SSC_LOAD_SYMBOLS               69
+#define SSC_GET_TOD                    74
+#define SSC_CTL_TRACE                  76
+
+#define SSC_NETDEV_PROBE               100
+#define SSC_NETDEV_SEND                        101
+#define SSC_NETDEV_RECV                        102
+#define SSC_NETDEV_ATTACH              103
+#define SSC_NETDEV_DETACH              104
+
+/*
+ * Simulator system call.
+ */
+extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
+
+#ifdef XEN
+/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
+ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
+#define SSC_OPEN                       50
+#define SSC_CLOSE                      51
+#define SSC_READ                       52
+#define SSC_WRITE                      53
+#define SSC_GET_COMPLETION             54
+#define SSC_WAIT_COMPLETION            55
+
+#define SSC_WRITE_ACCESS               2
+#define SSC_READ_ACCESS                        1
+
+struct ssc_disk_req {
+       unsigned long addr;
+       unsigned long len;
+};
+#endif
+
+#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
index 5e80ab041ba58886d6f6b2aca90f7ac512daf1cc..ff66c3e51bd524bf0f98bfbdd55116240d03a853 100644 (file)
@@ -28,8 +28,8 @@
 #include <asm/privop.h>
 #include <asm/vcpu.h>
 #include <asm/ia64_int.h>
-#include <asm/hpsim_ssc.h>
 #include <asm/dom_fw.h>
+#include "hpsim_ssc.h"
 
 extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
 extern struct ia64_sal_retval pal_emulator_static(UINT64);
index 701614130e798970baff83e59f72a45d862da34b..55f87e45d8e2ab762f9eeb6a47e3f89177ff4b9d 100644 (file)
@@ -176,6 +176,34 @@ void free_page_type(struct pfn_info *page, unsigned int type)
        dummy();
 }
 
+///////////////////////////////
+//// misc memory stuff
+///////////////////////////////
+
+unsigned long __get_free_pages(unsigned int mask, unsigned int order)
+{
+       void *p = alloc_xenheap_pages(order);
+
+       memset(p,0,PAGE_SIZE<<order);
+       return (unsigned long)p;
+}
+
+void __free_pages(struct page *page, unsigned int order)
+{
+       if (order) BUG();
+       free_xenheap_page(page);
+}
+
+void *pgtable_quicklist_alloc(void)
+{
+       return alloc_xenheap_pages(0);
+}
+
+void pgtable_quicklist_free(void *pgtable_entry)
+{
+       free_xenheap_page(pgtable_entry);
+}
+
 ///////////////////////////////
 // from arch/ia64/traps.c
 ///////////////////////////////
index 3cc3c9675017b1ec5400579a91cd084591d1de6c..90763d515fe3877a275a5a7d3fa1d52fc2a728f4 100644 (file)
@@ -168,7 +168,9 @@ struct page;
 #define ____cacheline_aligned_in_smp
 #define ____cacheline_maxaligned_in_smp
 
+#ifndef __ASSEMBLY__
 #include "asm/types.h" // for u64
+#endif
 
 // warning: unless search_extable is declared, the return value gets
 // truncated to 32-bits, causing a very strange error in privop handling
index a87d8588f7a4a6481b589b37c1e064db294bf38f..1339f9ebf9f77034792e48c7793bf12c845dd47f 100644 (file)
@@ -133,13 +133,17 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
        ia64_intri_res;                                                         \
 })
 
-#define ia64_popcnt(x)                                         \
-({                                                             \
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define ia64_popcnt(x)                __builtin_popcountl(x)
+#else
+# define ia64_popcnt(x)                                                \
+  ({                                                           \
        __u64 ia64_intri_res;                                   \
        asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
                                                                \
        ia64_intri_res;                                         \
-})
+  })
+#endif
 
 #define ia64_getf_exp(x)                                       \
 ({                                                             \
@@ -368,66 +372,6 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
 #define ia64_mf()      asm volatile ("mf" ::: "memory")
 #define ia64_mfa()     asm volatile ("mf.a" ::: "memory")
 
-#ifdef CONFIG_VTI
-/*
- * Flushrs instruction stream.
- */
-#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
-
-#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
-
-#define ia64_get_rsc()                          \
-({                                  \
-    unsigned long val;                     \
-    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
-    val;                               \
-})
-
-#define ia64_set_rsc(val)                       \
-    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_bspstore()     \
-({                                  \
-    unsigned long val;                     \
-    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
-    val;                               \
-})
-
-#define ia64_set_bspstore(val)                       \
-    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
-
-#define ia64_get_rnat()     \
-({                                  \
-    unsigned long val;                     \
-    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
-    val;                               \
-})
-
-#define ia64_set_rnat(val)                       \
-    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
-
-#define ia64_ttag(addr)                                                        \
-({                                                                             \
-       __u64 ia64_intri_res;                                                   \
-       asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));        \
-       ia64_intri_res;                                                         \
-})
-
-#define ia64_get_dcr()                          \
-({                                      \
-    __u64 result;                               \
-    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
-    result;                                 \
-})
-
-#define ia64_set_dcr(val)                           \
-({                                      \
-    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
-})
-
-#endif // CONFIG_VTI
-
-
 #define ia64_invala() asm volatile ("invala" ::: "memory")
 
 #define ia64_thash(addr)                                                       \
@@ -654,4 +598,8 @@ do {                                                                \
                      :: "r"((x)) : "p6", "p7", "memory");      \
 } while (0)
 
+#ifdef XEN
+#include <asm/xengcc_intrin.h>
+#endif
+
 #endif /* _ASM_IA64_GCC_INTRIN_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h b/xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h
deleted file mode 100644 (file)
index c3f36e7..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Platform dependent support for HP simulator.
- *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
- */
-#ifndef _IA64_PLATFORM_HPSIM_SSC_H
-#define _IA64_PLATFORM_HPSIM_SSC_H
-
-/* Simulator system calls: */
-
-#define SSC_CONSOLE_INIT               20
-#define SSC_GETCHAR                    21
-#define SSC_PUTCHAR                    31
-#define SSC_CONNECT_INTERRUPT          58
-#define SSC_GENERATE_INTERRUPT         59
-#define SSC_SET_PERIODIC_INTERRUPT     60
-#define SSC_GET_RTC                    65
-#define SSC_EXIT                       66
-#define SSC_LOAD_SYMBOLS               69
-#define SSC_GET_TOD                    74
-#define SSC_CTL_TRACE                  76
-
-#define SSC_NETDEV_PROBE               100
-#define SSC_NETDEV_SEND                        101
-#define SSC_NETDEV_RECV                        102
-#define SSC_NETDEV_ATTACH              103
-#define SSC_NETDEV_DETACH              104
-
-/*
- * Simulator system call.
- */
-extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
-
-#ifdef XEN
-/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
- * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
-#define SSC_OPEN                       50
-#define SSC_CLOSE                      51
-#define SSC_READ                       52
-#define SSC_WRITE                      53
-#define SSC_GET_COMPLETION             54
-#define SSC_WAIT_COMPLETION            55
-
-#define SSC_WRITE_ACCESS               2
-#define SSC_READ_ACCESS                        1
-
-struct ssc_disk_req {
-       unsigned long addr;
-       unsigned long len;
-};
-#endif
-
-#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
index d30d54dacc6c530167cbc8c3537090ede2d2dd18..a90db1022adbb72f6e31a7b40a3cef015fd51d6b 100644 (file)
 #define _IA64_REG_CR_LRR0      4176
 #define _IA64_REG_CR_LRR1      4177
 
-#ifdef  CONFIG_VTI
-#define IA64_REG_CR_DCR   0
-#define IA64_REG_CR_ITM   1
-#define IA64_REG_CR_IVA   2
-#define IA64_REG_CR_PTA   8
-#define IA64_REG_CR_IPSR  16
-#define IA64_REG_CR_ISR   17
-#define IA64_REG_CR_IIP   19
-#define IA64_REG_CR_IFA   20
-#define IA64_REG_CR_ITIR  21
-#define IA64_REG_CR_IIPA  22
-#define IA64_REG_CR_IFS   23
-#define IA64_REG_CR_IIM   24
-#define IA64_REG_CR_IHA   25
-#define IA64_REG_CR_LID   64
-#define IA64_REG_CR_IVR   65
-#define IA64_REG_CR_TPR   66
-#define IA64_REG_CR_EOI   67
-#define IA64_REG_CR_IRR0  68
-#define IA64_REG_CR_IRR1  69
-#define IA64_REG_CR_IRR2  70
-#define IA64_REG_CR_IRR3  71
-#define IA64_REG_CR_ITV   72
-#define IA64_REG_CR_PMV   73
-#define IA64_REG_CR_CMCV  74
-#define IA64_REG_CR_LRR0  80
-#define IA64_REG_CR_LRR1  81
-#endif  //  CONFIG_VTI
-
 /* Indirect Registers for getindreg() and setindreg() */
 
 #define _IA64_REG_INDR_CPUID   9000    /* getindreg only */
 #define _IA64_REG_INDR_PMD     9005
 #define _IA64_REG_INDR_RR      9006
 
+#ifdef XEN
+#include <asm/xenia64regs.h>
+#endif
+
 #endif /* _ASM_IA64_IA64REGS_H */
index 74fec0c5170230b7fb636c66ddc21a6a5c19a8b6..883690df6dd0c4faaf523e8996167b9a0b7d38c1 100644 (file)
@@ -124,14 +124,6 @@ static inline void ___ia64_mmiowb(void)
        ia64_mfa();
 }
 
-static inline const unsigned long
-__ia64_get_io_port_base (void)
-{
-       extern unsigned long ia64_iobase;
-
-       return ia64_iobase;
-}
-
 static inline void*
 __ia64_mk_io_addr (unsigned long port)
 {
index 28b6501cbf601525ee8140f0f7d095117d2ac03d..8e0795f0c8af678c169f498816f6acadc57ee18f 100644 (file)
  */
 #define IA64_TR_KERNEL         0       /* itr0, dtr0: maps kernel image (code & data) */
 #define IA64_TR_PALCODE                1       /* itr1: maps PALcode as required by EFI */
-#ifdef CONFIG_VTI
-#define IA64_TR_XEN_IN_DOM     6       /* itr6, dtr6: Double mapping for xen image in domain space */
-#endif // CONFIG_VTI
 #define IA64_TR_PERCPU_DATA    1       /* dtr1: percpu data */
 #define IA64_TR_CURRENT_STACK  2       /* dtr2: maps kernel's memory- & register-stacks */
-#ifdef XEN
-#define IA64_TR_SHARED_INFO    3       /* dtr3: page shared with domain */
-#define        IA64_TR_VHPT            4       /* dtr4: vhpt */
-#define IA64_TR_ARCH_INFO      5
-#ifdef CONFIG_VTI
-#define IA64_TR_VHPT_IN_DOM    5       /* dtr5: Double mapping for vhpt table in domain space */
-#define IA64_TR_RR7_SWITCH_STUB        7       /* dtr7: mapping for rr7 switch stub */
-#define IA64_TEMP_PHYSICAL     8       /* itr8, dtr8: temp mapping for guest physical memory 256M */
-#endif // CONFIG_VTI
-#endif
 
 /* Processor status register bits: */
 #define IA64_PSR_BE_BIT                1
@@ -79,9 +66,6 @@
 #define IA64_PSR_ED_BIT                43
 #define IA64_PSR_BN_BIT                44
 #define IA64_PSR_IA_BIT                45
-#ifdef CONFIG_VTI
-#define IA64_PSR_VM_BIT                46
-#endif // CONFIG_VTI
 
 /* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
    execve().  Only list flags here that need to be cleared/set for BOTH clone2() and
 #define IA64_PSR_ED    (__IA64_UL(1) << IA64_PSR_ED_BIT)
 #define IA64_PSR_BN    (__IA64_UL(1) << IA64_PSR_BN_BIT)
 #define IA64_PSR_IA    (__IA64_UL(1) << IA64_PSR_IA_BIT)
-#ifdef CONFIG_VTI
-#define IA64_PSR_VM    (__IA64_UL(1) << IA64_PSR_VM_BIT)
-#endif // CONFIG_VTI
 
 /* User mask bits: */
 #define IA64_PSR_UM    (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
 #define IA64_ISR_CODE_PROBEF   5
 
 #ifdef XEN
-/* Interruption Function State */
-#define IA64_IFS_V_BIT         63
-#define IA64_IFS_V     (__IA64_UL(1) << IA64_IFS_V_BIT)
-
-/* Page Table Address */
-#define IA64_PTA_VE_BIT 0
-#define IA64_PTA_SIZE_BIT 2
-#define IA64_PTA_VF_BIT 8
-#define IA64_PTA_BASE_BIT 15
-
-#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
-#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
-#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
-#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
+#include <asm/xenkregs.h>
 #endif
 
 #endif /* _ASM_IA64_kREGS_H */
index c95ee4fcbc5eecd39569f4bee6bec4183fce092d..ea5933ecc22f344f104d2098903d9f8c6919847d 100644 (file)
 #ifdef XEN
 #define INST_VA_TO_PA(addr)                                                    \
        dep     addr    = 0, addr, 60, 4
-#else // XEN
+#else
 #define INST_VA_TO_PA(addr)                                                    \
        dep     addr    = 0, addr, 61, 3
-#endif // XEN
+#endif
 /*
  * This macro converts a data virtual address to a physical address
  * Right now for simulation purposes the virtual addresses are
 #define DATA_PA_TO_VA(addr,temp)                                                       \
        mov     temp    = 0xf   ;;                                                      \
        dep     addr    = temp, addr, 60, 4
-#else // XEN
+#else
 #define DATA_PA_TO_VA(addr,temp)                                                       \
        mov     temp    = 0x7   ;;                                                      \
        dep     addr    = temp, addr, 61, 3
-#endif // XEN
+#endif
 
+#ifdef XEN
+//FIXME LATER
+#else
 #define GET_THIS_PADDR(reg, var)               \
        mov     reg = IA64_KR(PER_CPU_DATA);;   \
         addl   reg = THIS_CPU(var), reg
+#endif
 
 /*
  * This macro jumps to the instruction at the given virtual address
index 4a9d3ddb2262d465c610bc39f53abe5f3b0401cb..f49a8a0bbf9d810c2e39abfa8706d1d047d25c7b 100644 (file)
@@ -32,7 +32,6 @@
 #define PAGE_ALIGN(addr)       (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
 
 #define PERCPU_PAGE_SHIFT      16      /* log2() of max. size of per-CPU area */
-
 #define PERCPU_PAGE_SIZE       (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
 
 #define RGN_MAP_LIMIT  ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)      /* per region addr limit */
@@ -96,15 +95,9 @@ extern int ia64_pfn_valid (unsigned long pfn);
 #endif
 
 #ifndef CONFIG_DISCONTIGMEM
-#ifdef XEN
-# define pfn_valid(pfn)                (0)
-# define page_to_pfn(_page)    ((unsigned long)((_page) - frame_table))
-# define pfn_to_page(_pfn)     (frame_table + (_pfn))
-#else
 # define pfn_valid(pfn)                (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
 # define page_to_pfn(page)     ((unsigned long) (page - mem_map))
 # define pfn_to_page(pfn)      (mem_map + (pfn))
-#endif
 #else
 extern struct page *vmem_map;
 extern unsigned long max_low_pfn;
@@ -116,11 +109,6 @@ extern unsigned long max_low_pfn;
 #define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 
-#ifdef XEN
-#define page_to_virt(_page)    phys_to_virt(page_to_phys(_page))
-#define phys_to_page(kaddr)    pfn_to_page(((kaddr) >> PAGE_SHIFT))
-#endif
-
 typedef union ia64_va {
        struct {
                unsigned long off : 61;         /* intra-region offset */
@@ -136,23 +124,8 @@ typedef union ia64_va {
  * expressed in this way to ensure they result in a single "dep"
  * instruction.
  */
-#ifdef XEN
-typedef union xen_va {
-       struct {
-               unsigned long off : 60;
-               unsigned long reg : 4;
-       } f;
-       unsigned long l;
-       void *p;
-} xen_va;
-
-// xen/drivers/console.c uses __va in a declaration (should be fixed!)
-#define __pa(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
-#define __va(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
-#else
 #define __pa(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
 #define __va(x)                ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
-#endif
 
 #define REGION_NUMBER(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
 #define REGION_OFFSET(x)       ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
@@ -164,9 +137,9 @@ typedef union xen_va {
 # define htlbpage_to_page(x)   (((unsigned long) REGION_NUMBER(x) << 61)                       \
                                 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
 # define HUGETLB_PAGE_ORDER    (HPAGE_SHIFT - PAGE_SHIFT)
-# define is_hugepage_only_range(addr, len)             \
+# define is_hugepage_only_range(mm, addr, len)         \
         (REGION_NUMBER(addr) == REGION_HPAGE &&        \
-         REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
+         REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
 extern unsigned int hpage_shift;
 #endif
 
@@ -224,15 +197,15 @@ get_order (unsigned long size)
 # define __pgprot(x)   (x)
 #endif /* !STRICT_MM_TYPECHECKS */
 
-#ifdef XEN
-#define PAGE_OFFSET                    __IA64_UL_CONST(0xf000000000000000)
-#else
 #define PAGE_OFFSET                    __IA64_UL_CONST(0xe000000000000000)
-#endif
 
 #define VM_DATA_DEFAULT_FLAGS          (VM_READ | VM_WRITE |                                   \
                                         VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |                \
                                         (((current->personality & READ_IMPLIES_EXEC) != 0)     \
                                          ? VM_EXEC : 0))
 
+#ifdef XEN
+#include <asm/xenpage.h>
+#endif
+
 #endif /* _ASM_IA64_PAGE_H */
index 09c98cde6e32964dff8082125d6d5f6b3968b6fa..96a405de96929074c16b3e9cbb91b6c31ce68b0c 100644 (file)
 #include <linux/threads.h>
 
 #include <asm/mmu_context.h>
-#include <asm/processor.h>
 
-/*
- * Very stupidly, we used to get new pgd's and pmd's, init their contents
- * to point to the NULL versions of the next level page table, later on
- * completely re-init them the same way, then free them up.  This wasted
- * a lot of work and caused unnecessary memory traffic.  How broken...
- * We fix this by caching them.
- */
-#define pgd_quicklist          (local_cpu_data->pgd_quick)
-#define pmd_quicklist          (local_cpu_data->pmd_quick)
-#define pgtable_cache_size     (local_cpu_data->pgtable_cache_sz)
+#ifndef XEN
+DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
+#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
+DECLARE_PER_CPU(long, __pgtable_quicklist_size);
+#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
+
+static inline long pgtable_quicklist_total_size(void)
+{
+       long ql_size = 0;
+       int cpuid;
+
+       for_each_online_cpu(cpuid) {
+               ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
+       }
+       return ql_size;
+}
 
-static inline pgd_t*
-pgd_alloc_one_fast (struct mm_struct *mm)
+static inline void *pgtable_quicklist_alloc(void)
 {
        unsigned long *ret = NULL;
 
        preempt_disable();
 
-       ret = pgd_quicklist;
+       ret = pgtable_quicklist;
        if (likely(ret != NULL)) {
-               pgd_quicklist = (unsigned long *)(*ret);
+               pgtable_quicklist = (unsigned long *)(*ret);
                ret[0] = 0;
-               --pgtable_cache_size;
-       } else
-               ret = NULL;
-
-       preempt_enable();
+               --pgtable_quicklist_size;
+               preempt_enable();
+       } else {
+               preempt_enable();
+               ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       }
 
-       return (pgd_t *) ret;
+       return ret;
 }
 
-static inline pgd_t*
-pgd_alloc (struct mm_struct *mm)
+static inline void pgtable_quicklist_free(void *pgtable_entry)
 {
-       /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
-       pgd_t *pgd = pgd_alloc_one_fast(mm);
-
-       if (unlikely(pgd == NULL)) {
-#ifdef XEN
-               pgd = (pgd_t *)alloc_xenheap_page();
-               memset(pgd,0,PAGE_SIZE);
-#else
-               pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-#endif
+#ifdef CONFIG_NUMA
+       unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
+
+       if (unlikely(nid != numa_node_id())) {
+               free_page((unsigned long)pgtable_entry);
+               return;
        }
-       return pgd;
-}
+#endif
 
-static inline void
-pgd_free (pgd_t *pgd)
-{
        preempt_disable();
-       *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
-       pgd_quicklist = (unsigned long *) pgd;
-       ++pgtable_cache_size;
+       *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
+       pgtable_quicklist = (unsigned long *)pgtable_entry;
+       ++pgtable_quicklist_size;
        preempt_enable();
 }
+#endif
 
-static inline void
-pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       pud_val(*pud_entry) = __pa(pmd);
+       return pgtable_quicklist_alloc();
 }
 
-static inline pmd_t*
-pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
+static inline void pgd_free(pgd_t * pgd)
 {
-       unsigned long *ret = NULL;
-
-       preempt_disable();
-
-       ret = (unsigned long *)pmd_quicklist;
-       if (likely(ret != NULL)) {
-               pmd_quicklist = (unsigned long *)(*ret);
-               ret[0] = 0;
-               --pgtable_cache_size;
-       }
-
-       preempt_enable();
-
-       return (pmd_t *)ret;
+       pgtable_quicklist_free(pgd);
 }
 
-static inline pmd_t*
-pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
+static inline void
+pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
 {
-#ifdef XEN
-       pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
-       memset(pmd,0,PAGE_SIZE);
-#else
-       pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-#endif
+       pud_val(*pud_entry) = __pa(pmd);
+}
 
-       return pmd;
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+       return pgtable_quicklist_alloc();
 }
 
-static inline void
-pmd_free (pmd_t *pmd)
+static inline void pmd_free(pmd_t * pmd)
 {
-       preempt_disable();
-       *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
-       pmd_quicklist = (unsigned long *) pmd;
-       ++pgtable_cache_size;
-       preempt_enable();
+       pgtable_quicklist_free(pmd);
 }
 
 #define __pmd_free_tlb(tlb, pmd)       pmd_free(pmd)
 
 static inline void
-pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
+pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
 {
        pmd_val(*pmd_entry) = page_to_phys(pte);
 }
 
 static inline void
-pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
 {
        pmd_val(*pmd_entry) = __pa(pte);
 }
 
-static inline struct page *
-pte_alloc_one (struct mm_struct *mm, unsigned long addr)
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+                                        unsigned long addr)
 {
-#ifdef XEN
-       struct page *pte = alloc_xenheap_page();
-       memset(pte,0,PAGE_SIZE);
-#else
-       struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-#endif
-
-       return pte;
+       return virt_to_page(pgtable_quicklist_alloc());
 }
 
-static inline pte_t *
-pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+                                         unsigned long addr)
 {
-#ifdef XEN
-       pte_t *pte = (pte_t *)alloc_xenheap_page();
-       memset(pte,0,PAGE_SIZE);
-#else
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-#endif
-
-       return pte;
+       return pgtable_quicklist_alloc();
 }
 
-static inline void
-pte_free (struct page *pte)
+static inline void pte_free(struct page *pte)
 {
-#ifdef XEN
-       free_xenheap_page(pte);
-#else
-       __free_page(pte);
-#endif
+       pgtable_quicklist_free(page_address(pte));
 }
 
-static inline void
-pte_free_kernel (pte_t *pte)
+static inline void pte_free_kernel(pte_t * pte)
 {
-#ifdef XEN
-       free_xenheap_page((unsigned long) pte);
-#else
-       free_page((unsigned long) pte);
-#endif
+       pgtable_quicklist_free(pte);
 }
 
-#define __pte_free_tlb(tlb, pte)       tlb_remove_page((tlb), (pte))
+#define __pte_free_tlb(tlb, pte)       pte_free(pte)
 
-extern void check_pgt_cache (void);
+extern void check_pgt_cache(void);
 
-#endif /* _ASM_IA64_PGALLOC_H */
+#endif                         /* _ASM_IA64_PGALLOC_H */
index a35d69a9a8df0041f24f8ed2adc689fec69c19b3..4b7f22da944455f8cc77926442f218f095186acd 100644 (file)
  */
 #define TASK_SIZE              (current->thread.task_size)
 
-/*
- * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
- * address-space MM.  Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
- * because the kernel may have installed helper-mappings above TASK_SIZE.  For example,
- * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
- */
-#define MM_VM_SIZE(mm)         DEFAULT_TASK_SIZE
-
 /*
  * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
 #ifdef CONFIG_NUMA
 #include <asm/nodedata.h>
 #endif
+
 #ifdef XEN
 #include <asm/xenprocessor.h>
-#endif
-
-#ifndef XEN
+#else
 /* like above but expressed as bitfields for more efficient access: */
 struct ia64_psr {
        __u64 reserved0 : 1;
@@ -150,9 +141,6 @@ struct cpuinfo_ia64 {
        __u64 nsec_per_cyc;     /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
        __u64 unimpl_va_mask;   /* mask of unimplemented virtual address bits (from PAL) */
        __u64 unimpl_pa_mask;   /* mask of unimplemented physical address bits (from PAL) */
-       __u64 *pgd_quick;
-       __u64 *pmd_quick;
-       __u64 pgtable_cache_sz;
        __u64 itc_freq;         /* frequency of ITC counter */
        __u64 proc_freq;        /* frequency of processor */
        __u64 cyc_per_usec;     /* itc_freq/1000000 */
@@ -190,22 +178,6 @@ struct cpuinfo_ia64 {
 
 DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
 
-typedef union {
-       struct {
-               __u64 kr0;
-               __u64 kr1;
-               __u64 kr2;
-               __u64 kr3;
-               __u64 kr4;
-               __u64 kr5;
-               __u64 kr6;
-               __u64 kr7;
-       };
-       __u64 _kr[8];
-} cpu_kr_ia64_t;
-
-DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
-
 /*
  * The "local" data variable.  It refers to the per-CPU data of the currently executing
  * CPU, much like "current" points to the per-task data of the currently executing task.
@@ -435,7 +407,10 @@ extern void ia64_setreg_unknown_kr (void);
  * task_struct at this point.
  */
 
-/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
+/*
+ * Return TRUE if task T owns the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
 #ifndef XEN
 #define ia64_is_local_fpu_owner(t)                                                             \
 ({                                                                                             \
@@ -445,7 +420,10 @@ extern void ia64_setreg_unknown_kr (void);
 })
 #endif
 
-/* Mark task T as owning the fph partition of the CPU we're running on. */
+/*
+ * Mark task T as owning the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
 #define ia64_set_local_fpu_owner(t) do {                                               \
        struct task_struct *__ia64_slfo_task = (t);                                     \
        __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();                     \
index 608215825581e71cae31c198951c75260b309f28..c2d32e7a4868ca6664ace8df8bc63ea1755439db 100644 (file)
@@ -120,35 +120,6 @@ do {                                                                                       \
 #define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
 #define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
 
-#ifdef XEN
-/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
- * reentered recursively on the same CPU. All critical regions that may form
- * part of a recursively-nested set must be protected by these forms. If there
- * are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
- */
-#define _raw_spin_lock_recursive(_lock)            \
-    do {                                           \
-        int cpu = smp_processor_id();              \
-        if ( likely((_lock)->recurse_cpu != cpu) ) \
-        {                                          \
-            spin_lock(_lock);                      \
-            (_lock)->recurse_cpu = cpu;            \
-        }                                          \
-        (_lock)->recurse_cnt++;                    \
-    } while ( 0 )
-
-#define _raw_spin_unlock_recursive(_lock)          \
-    do {                                           \
-        if ( likely(--(_lock)->recurse_cnt == 0) ) \
-        {                                          \
-            (_lock)->recurse_cpu = -1;             \
-            spin_unlock(_lock);                    \
-        }                                          \
-    } while ( 0 )
-#endif
-
 typedef struct {
        volatile unsigned int read_counter      : 31;
        volatile unsigned int write_lock        :  1;
@@ -238,4 +209,7 @@ do {                                                                                \
        clear_bit(31, (x));                                                             \
 })
 
+#ifdef XEN
+#include <asm/xenspinlock.h>
+#endif
 #endif /*  _ASM_IA64_SPINLOCK_H */
index d8d9dd20db54095f5df69965d6e66a38c1b583b9..e77f67225f4195db67a90824a7c8eb0f121a76d6 100644 (file)
 #include <asm/page.h>
 #include <asm/pal.h>
 #include <asm/percpu.h>
-#ifdef XEN
-#include <asm/xensystem.h>
-#endif
 
 #define GATE_ADDR              __IA64_UL_CONST(0xa000000000000000)
 /*
  * 0xa000000000000000+2*PERCPU_PAGE_SIZE
  * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
  */
-#ifndef XEN
 #define KERNEL_START            __IA64_UL_CONST(0xa000000100000000)
 #define PERCPU_ADDR            (-PERCPU_PAGE_SIZE)
-#endif
 
 #ifndef __ASSEMBLY__
 
@@ -188,8 +183,6 @@ do {                                                                \
 
 #ifdef __KERNEL__
 
-#define prepare_to_switch()    do { } while(0)
-
 #ifdef CONFIG_IA32_SUPPORT
 # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
 #else
@@ -223,7 +216,6 @@ extern void ia64_load_extra (struct task_struct *task);
 # define PERFMON_IS_SYSWIDE() (0)
 #endif
 
-#ifndef XEN
 #define IA64_HAS_EXTRA_STATE(t)                                                        \
        ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       \
         || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
@@ -236,7 +228,6 @@ extern void ia64_load_extra (struct task_struct *task);
        ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);                    \
        (last) = ia64_switch_to((next));                                                         \
 } while (0)
-#endif 
 
 #ifdef CONFIG_SMP
 /*
@@ -247,9 +238,9 @@ extern void ia64_load_extra (struct task_struct *task);
  */
 # define switch_to(prev,next,last) do {                                                \
        if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {                             \
-               /* ia64_psr(ia64_task_regs(prev))->mfh = 0; */                  \
-               /* (prev)->thread.flags |= IA64_THREAD_FPH_VALID; */                    \
-               /* __ia64_save_fpu((prev)->thread.fph); */                              \
+               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        \
+               (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  \
+               __ia64_save_fpu((prev)->thread.fph);                            \
        }                                                                       \
        __switch_to(prev, next, last);                                          \
 } while (0)
@@ -281,19 +272,20 @@ extern void ia64_load_extra (struct task_struct *task);
  * of that CPU which will not be released, because there we wait for the
  * tasklist_lock to become available.
  */
-#define prepare_arch_switch(rq, next)          \
-do {                                           \
-       spin_lock(&(next)->switch_lock);        \
-       spin_unlock(&(rq)->lock);               \
-} while (0)
-#define finish_arch_switch(rq, prev)   spin_unlock_irq(&(prev)->switch_lock)
-#define task_running(rq, p)            ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+#define __ARCH_WANT_UNLOCKED_CTXSW
 
 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
 
 void cpu_idle_wait(void);
+
+#define arch_align_stack(x) (x)
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
 
+#ifdef XEN
+#include <asm/xensystem.h>
+#endif
+
 #endif /* _ASM_IA64_SYSTEM_H */
index 3cab2a5af2b57578e88cd73c134275400acbdbda..a53253f8d2796ff128a206b45a4d86fbe8c1f250 100644 (file)
@@ -37,6 +37,7 @@ static inline void
 local_finish_flush_tlb_mm (struct mm_struct *mm)
 {
 #ifndef XEN
+// FIXME SMP?
        if (mm == current->active_mm)
                activate_context(mm);
 #endif
@@ -54,6 +55,7 @@ flush_tlb_mm (struct mm_struct *mm)
                return;
 
 #ifndef XEN
+// FIXME SMP?
        mm->context = 0;
 #endif
 
@@ -81,6 +83,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
        if (vma->vm_mm == current->active_mm)
                ia64_ptcl(addr, (PAGE_SHIFT << 2));
 #ifndef XEN
+// FIXME SMP?
        else
                vma->vm_mm->context = 0;
 #endif
index 71b19cbc637f7452614b3a5fd32c083f61b558d4..d2c04d3f6a948bacd590cff74c96785b45c39d56 100644 (file)
@@ -1,12 +1,5 @@
 #ifndef _ASM_IA64_TYPES_H
 #define _ASM_IA64_TYPES_H
-#ifdef XEN
-#ifndef __ASSEMBLY__
-typedef unsigned long ssize_t;
-typedef unsigned long size_t;
-typedef long long loff_t;
-#endif
-#endif
 
 /*
  * This file is never included by application software unless explicitly requested (e.g.,
@@ -68,28 +61,6 @@ typedef __u32 u32;
 typedef __s64 s64;
 typedef __u64 u64;
 
-#ifdef XEN
-/*
- * Below are truly Linux-specific types that should never collide with
- * any application/library that wants linux/types.h.
- */
-
-#ifdef __CHECKER__
-#define __bitwise __attribute__((bitwise))
-#else
-#define __bitwise
-#endif
-
-typedef __u16 __bitwise __le16;
-typedef __u16 __bitwise __be16;
-typedef __u32 __bitwise __le32;
-typedef __u32 __bitwise __be32;
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __u64 __bitwise __le64;
-typedef __u64 __bitwise __be64;
-#endif
-#endif
-
 #define BITS_PER_LONG 64
 
 /* DMA addresses are 64-bits wide, in general.  */
@@ -101,4 +72,8 @@ typedef unsigned short kmem_bufctl_t;
 # endif /* __KERNEL__ */
 #endif /* !__ASSEMBLY__ */
 
+#ifdef XEN
+#include <asm/xentypes.h>
+#endif
+
 #endif /* _ASM_IA64_TYPES_H */
index 607faf21518051257fe881e090d90714a1589ffd..e206565fd5d066f5455cc838dcc9abcedd481160 100644 (file)
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
-#ifdef CONFIG_VTI
-#include <asm/vmx_uaccess.h>
-#else // CONFIG_VTI
-
 #include <linux/compiler.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
 
 #include <asm/intrinsics.h>
 #include <asm/pgtable.h>
+#include <asm/io.h>
 
 /*
  * For historical reasons, the following macros are grossly misnamed:
@@ -65,7 +64,6 @@
  * point inside the virtually mapped linear page table.
  */
 #ifdef XEN
-/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
 #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
 #define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr)))
 #else
@@ -79,7 +77,8 @@
 #endif
 #define access_ok(type, addr, size)    __access_ok((addr), (size), get_fs())
 
-static inline int
+/* this function will go away soon - use access_ok() instead */
+static inline int __deprecated
 verify_area (int type, const void __user *addr, unsigned long size)
 {
        return access_ok(type, addr, size) ? 0 : -EFAULT;
@@ -353,7 +352,6 @@ extern unsigned long __strnlen_user (const char __user *, long);
        __su_ret;                                               \
 })
 
-#endif // CONFIG_VTI
 /* Generic code can't deal with the location-relative format that we use for compactness.  */
 #define ARCH_HAS_SORT_EXTABLE
 #define ARCH_HAS_SEARCH_EXTABLE
@@ -378,4 +376,40 @@ ia64_done_with_exception (struct pt_regs *regs)
        return 0;
 }
 
+#ifndef XEN
+#define ARCH_HAS_TRANSLATE_MEM_PTR     1
+static __inline__ char *
+xlate_dev_mem_ptr (unsigned long p)
+{
+       struct page *page;
+       char * ptr;
+
+       page = pfn_to_page(p >> PAGE_SHIFT);
+       if (PageUncached(page))
+               ptr = (char *)p + __IA64_UNCACHED_OFFSET;
+       else
+               ptr = __va(p);
+
+       return ptr;
+}
+
+/*
+ * Convert a virtual cached kernel memory pointer to an uncached pointer
+ */
+static __inline__ char *
+xlate_dev_kmem_ptr (char * p)
+{
+       struct page *page;
+       char * ptr;
+
+       page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
+       if (PageUncached(page))
+               ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
+       else
+               ptr = p;
+
+       return ptr;
+}
+#endif
+
 #endif /* _ASM_IA64_UACCESS_H */
index 1aef8198e140cf04a7974fbfc98b1992f98aaf92..46e552641f49f6feb71207cc63428c33849c9eec 100644 (file)
@@ -10,6 +10,8 @@
  *
  * For details of cpumask_scnprintf() and cpumask_parse(),
  * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ * For details of cpulist_scnprintf() and cpulist_parse(), see
+ * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
  *
  * The available cpumask operations are:
  *
@@ -46,6 +48,8 @@
  *
  * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
  * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
+ * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
+ * int cpulist_parse(buf, map)         Parse ascii string as cpulist
  *
  * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
  *
@@ -268,14 +272,28 @@ static inline int __cpumask_scnprintf(char *buf, int len,
        return bitmap_scnprintf(buf, len, srcp->bits, nbits);
 }
 
-#define cpumask_parse(ubuf, ulen, src) \
-                       __cpumask_parse((ubuf), (ulen), &(src), NR_CPUS)
+#define cpumask_parse(ubuf, ulen, dst) \
+                       __cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
 static inline int __cpumask_parse(const char __user *buf, int len,
                                        cpumask_t *dstp, int nbits)
 {
        return bitmap_parse(buf, len, dstp->bits, nbits);
 }
 
+#define cpulist_scnprintf(buf, len, src) \
+                       __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
+static inline int __cpulist_scnprintf(char *buf, int len,
+                                       const cpumask_t *srcp, int nbits)
+{
+       return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
+}
+
+#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
+static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
+{
+       return bitmap_parselist(buf, dstp->bits, nbits);
+}
+
 #if NR_CPUS > 1
 #define for_each_cpu_mask(cpu, mask)           \
        for ((cpu) = first_cpu(mask);           \
index 31b91d845b2ec691d032544b4bc9f497952c6675..24314917263745df4c6debddd350accefcd90706 100644 (file)
@@ -2,6 +2,7 @@
 #define LINUX_HARDIRQ_H
 
 #include <linux/config.h>
+#include <linux/preempt.h>
 #include <linux/smp_lock.h>
 #include <asm/hardirq.h>
 #include <asm/system.h>
 #define __IRQ_MASK(x)  ((1UL << (x))-1)
 
 #define PREEMPT_MASK   (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
 #define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
 
 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
 
+#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
+#ifndef XEN
+#error PREEMPT_ACTIVE is too low!
+#endif
+#endif
+
 #define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
 #define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
 #define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
  */
 #define in_irq()               (hardirq_count())
 #define in_softirq()           (softirq_count())
-#ifndef XEN
-#define in_interrupt()         (irq_count())
+#ifdef XEN
+#define in_interrupt()         0               // FIXME SMP LATER
 #else
-#define in_interrupt()         0               // FIXME LATER
+#define in_interrupt()         (irq_count())
 #endif
 
 #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
index 1f12d7e77946cad7bf47ca2fd69bcd593813608d..caea47d6410554f476243477ae9a42b40b391a05 100644 (file)
@@ -123,7 +123,9 @@ struct softirq_action
 };
 
 asmlinkage void do_softirq(void);
-//extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
+#ifndef XEN
+extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
+#endif
 extern void softirq_init(void);
 #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
diff --git a/xen/include/asm-ia64/xengcc_intrin.h b/xen/include/asm-ia64/xengcc_intrin.h
new file mode 100644 (file)
index 0000000..818fae7
--- /dev/null
@@ -0,0 +1,59 @@
+#ifndef _ASM_IA64_XENGCC_INTRIN_H
+#define _ASM_IA64_XENGCC_INTRIN_H
+/*
+ * Flushrs instruction stream.
+ */
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
+#define ia64_get_rsc()                          \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_rsc(val)                       \
+    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_bspstore()     \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_bspstore(val)                       \
+    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
+
+#define ia64_get_rnat()     \
+({                                  \
+    unsigned long val;                     \
+    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
+    val;                               \
+})
+
+#define ia64_set_rnat(val)                       \
+    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
+
+#define ia64_ttag(addr)                                                        \
+({                                                                             \
+       __u64 ia64_intri_res;                                                   \
+       asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));        \
+       ia64_intri_res;                                                         \
+})
+
+#define ia64_get_dcr()                          \
+({                                      \
+    __u64 result;                               \
+    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
+    result;                                 \
+})
+
+#define ia64_set_dcr(val)                           \
+({                                      \
+    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
+})
+
+#endif /* _ASM_IA64_XENGCC_INTRIN_H */
diff --git a/xen/include/asm-ia64/xenia64regs.h b/xen/include/asm-ia64/xenia64regs.h
new file mode 100644 (file)
index 0000000..099fc72
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef _ASM_IA64_XENIA64REGS_H
+#define _ASM_IA64_XENIA64REGS_H
+
+#define IA64_REG_CR_DCR   0
+#define IA64_REG_CR_ITM   1
+#define IA64_REG_CR_IVA   2
+#define IA64_REG_CR_PTA   8
+#define IA64_REG_CR_IPSR  16
+#define IA64_REG_CR_ISR   17
+#define IA64_REG_CR_IIP   19
+#define IA64_REG_CR_IFA   20
+#define IA64_REG_CR_ITIR  21
+#define IA64_REG_CR_IIPA  22
+#define IA64_REG_CR_IFS   23
+#define IA64_REG_CR_IIM   24
+#define IA64_REG_CR_IHA   25
+#define IA64_REG_CR_LID   64
+#define IA64_REG_CR_IVR   65
+#define IA64_REG_CR_TPR   66
+#define IA64_REG_CR_EOI   67
+#define IA64_REG_CR_IRR0  68
+#define IA64_REG_CR_IRR1  69
+#define IA64_REG_CR_IRR2  70
+#define IA64_REG_CR_IRR3  71
+#define IA64_REG_CR_ITV   72
+#define IA64_REG_CR_PMV   73
+#define IA64_REG_CR_CMCV  74
+#define IA64_REG_CR_LRR0  80
+#define IA64_REG_CR_LRR1  81
+
+#endif /* _ASM_IA64_XENIA64REGS_H */
diff --git a/xen/include/asm-ia64/xenkregs.h b/xen/include/asm-ia64/xenkregs.h
new file mode 100644 (file)
index 0000000..c2eb14e
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _ASM_IA64_XENKREGS_H
+#define _ASM_IA64_XENKREGS_H
+
+/*
+ * Translation registers:
+ */
+#define IA64_TR_SHARED_INFO    3       /* dtr3: page shared with domain */
+#define        IA64_TR_VHPT            4       /* dtr4: vhpt */
+#define IA64_TR_ARCH_INFO      5
+
+#ifdef CONFIG_VTI
+#define IA64_TR_VHPT_IN_DOM    5       /* dtr5: Double mapping for vhpt table in domain space */
+#define IA64_TR_XEN_IN_DOM     6       /* itr6, dtr6: Double mapping for xen image in domain space */
+#define IA64_TR_RR7_SWITCH_STUB        7       /* dtr7: mapping for rr7 switch stub */
+#define IA64_TEMP_PHYSICAL     8       /* itr8, dtr8: temp mapping for guest physical memory 256M */
+#endif // CONFIG_VTI
+
+/* Processor status register bits: */
+#define IA64_PSR_VM_BIT                46
+#define IA64_PSR_VM    (__IA64_UL(1) << IA64_PSR_VM_BIT)
+
+/* Interruption Function State */
+#define IA64_IFS_V_BIT         63
+#define IA64_IFS_V     (__IA64_UL(1) << IA64_IFS_V_BIT)
+
+/* Page Table Address */
+#define IA64_PTA_VE_BIT 0
+#define IA64_PTA_SIZE_BIT 2
+#define IA64_PTA_VF_BIT 8
+#define IA64_PTA_BASE_BIT 15
+
+#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
+#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
+#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
+#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
+
+#endif /* _ASM_IA64_XENKREGS_H */
diff --git a/xen/include/asm-ia64/xenpage.h b/xen/include/asm-ia64/xenpage.h
new file mode 100644 (file)
index 0000000..4025eb1
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_XENPAGE_H
+#define _ASM_IA64_XENPAGE_H
+
+#ifdef CONFIG_DISCONTIGMEM
+#error "xenpage.h: page macros need to be defined for CONFIG_DISCONTIGMEM"
+#endif
+
+#undef pfn_valid
+#undef page_to_pfn
+#undef pfn_to_page
+# define pfn_valid(pfn)                (0)
+# define page_to_pfn(_page)    ((unsigned long) ((_page) - frame_table))
+# define pfn_to_page(_pfn)     (frame_table + (_pfn))
+
+#undef page_to_phys
+#undef virt_to_page
+#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define page_to_virt(_page)    phys_to_virt(page_to_phys(_page))
+#define phys_to_page(kaddr)    pfn_to_page(((kaddr) >> PAGE_SHIFT))
+
+#ifndef __ASSEMBLY__
+typedef union xen_va {
+       struct {
+               unsigned long off : 60;
+               unsigned long reg : 4;
+       } f;
+       unsigned long l;
+       void *p;
+} xen_va;
+#endif
+
+#undef __pa
+#undef __va
+#define __pa(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+#define __va(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
+
+#undef PAGE_OFFSET
+#define PAGE_OFFSET    __IA64_UL_CONST(0xf000000000000000)
+
+#endif /* _ASM_IA64_XENPAGE_H */
index 91badbeb513a742e7712a00817b12dcf849c4b26..62c0459360112ee78a2f849b364b3806cd7cbbb1 100644 (file)
@@ -213,4 +213,20 @@ enum {
         ret;                            \
 })
 
+typedef union {
+       struct {
+               __u64 kr0;
+               __u64 kr1;
+               __u64 kr2;
+               __u64 kr3;
+               __u64 kr4;
+               __u64 kr5;
+               __u64 kr6;
+               __u64 kr7;
+       };
+       __u64 _kr[8];
+} cpu_kr_ia64_t;
+
+DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
+
 #endif // _ASM_IA64_XENPROCESSOR_H
diff --git a/xen/include/asm-ia64/xenspinlock.h b/xen/include/asm-ia64/xenspinlock.h
new file mode 100644 (file)
index 0000000..d383df4
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _ASM_IA64_XENSPINLOCK_H
+#define _ASM_IA64_XENSPINLOCK_H
+
+/*
+ * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
+ * reentered recursively on the same CPU. All critical regions that may form
+ * part of a recursively-nested set must be protected by these forms. If there
+ * are any critical regions that cannot form part of such a set, they can use
+ * standard spin_[un]lock().
+ */
+#define _raw_spin_lock_recursive(_lock)            \
+    do {                                           \
+        int cpu = smp_processor_id();              \
+        if ( likely((_lock)->recurse_cpu != cpu) ) \
+        {                                          \
+            spin_lock(_lock);                      \
+            (_lock)->recurse_cpu = cpu;            \
+        }                                          \
+        (_lock)->recurse_cnt++;                    \
+    } while ( 0 )
+
+#define _raw_spin_unlock_recursive(_lock)          \
+    do {                                           \
+        if ( likely(--(_lock)->recurse_cnt == 0) ) \
+        {                                          \
+            (_lock)->recurse_cpu = -1;             \
+            spin_unlock(_lock);                    \
+        }                                          \
+    } while ( 0 )
+#endif /*  _ASM_IA64_XENSPINLOCK_H */
index 376f480764f1f0801ead3cfb75ab803af46d02e9..07958f6869e50c43b3b35673509845c129b3edc8 100644 (file)
@@ -22,7 +22,9 @@
 #endif // CONFIG_VTI
 
 #define XEN_START_ADDR          0xf000000000000000
+#undef KERNEL_START
 #define KERNEL_START            0xf000000004000000
+#undef PERCPU_ADDR
 #define PERCPU_ADDR             0xf100000000000000-PERCPU_PAGE_SIZE
 #define SHAREDINFO_ADDR                 0xf100000000000000
 #define VHPT_ADDR               0xf200000000000000
 
 #ifndef __ASSEMBLY__
 
+#undef IA64_HAS_EXTRA_STATE
 #define IA64_HAS_EXTRA_STATE(t) 0
 
+#undef __switch_to
 #ifdef CONFIG_VTI
 extern struct task_struct *vmx_ia64_switch_to (void *next_task);
 #define __switch_to(prev,next,last) do {       \
diff --git a/xen/include/asm-ia64/xentypes.h b/xen/include/asm-ia64/xentypes.h
new file mode 100644 (file)
index 0000000..83ff658
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_IA64_XENTYPES_H
+#define _ASM_IA64_XENTYPES_H
+
+#ifndef __ASSEMBLY__
+typedef unsigned long ssize_t;
+typedef unsigned long size_t;
+typedef long long loff_t;
+
+#ifdef __KERNEL__
+/* these lines taken from linux/types.h.  they belong in xen/types.h */
+#ifdef __CHECKER__
+#define __bitwise __attribute__((bitwise))
+#else
+#define __bitwise
+#endif
+
+typedef __u16 __bitwise __le16;
+typedef __u16 __bitwise __be16;
+typedef __u32 __bitwise __le32;
+typedef __u32 __bitwise __be32;
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __u64 __bitwise __le64;
+typedef __u64 __bitwise __be64;
+#endif
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_XENTYPES_H */